home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
AmigActive 10
/
AACD 10.iso
/
AACD
/
Magazine
/
Online
/
httpproxy
/
src
/
httpproxy.c
< prev
next >
Wrap
C/C++ Source or Header
|
1996-08-20
|
56KB
|
1,896 lines
/*(( "Header" */
/*
* $Id: httpproxy.c,v 0.22 1996/08/20 17:36:43 mshopf Exp mshopf $
*
* (c) 1995-96 Matthias Hopf
*
* A small little Http Proxy.
* It can serv as a ProxyProxy, too (i.e. it can perform only caching and will
* get its data from another proxy).
* That way it can be used for other protocol types than html, too.
*
* Run it standalone at high priority. It won't need much computing time as
* it does no busy wait at all.
* If you really want to re-get an already cached page, just reload it immedeately.
* (no other request inbetween and no more than ReloadTime seconds delay).
* If you browse offline, you'll get a note that the cache is invalid. Reload the
* page if you want to queue the page and get the old cache.
*/
/*
* $Log: httpproxy.c,v $
* Revision 0.22 1996/08/20 17:36:43 mshopf
* new queuing mode.
* two additional error output messages for the testAmiTCP.rexx script.
* fixed special handling for graphic Urls.
* a bit of cleanup.
* cleaned up template.
* added minnumreq, noqueue, nohttpproxyproxy option.
*
* Revision 0.21 1996/08/12 03:33:36 mshopf
* new FileToReq().
* new IsGfx().
* queued graphic urls dont output 'queued url' message any more
* but the (invalid) expired cache file or the file @msg/queued.gfx.
* using extra space in request_t for urls (logging).
*
* Revision 0.20 1996/08/11 22:25:15 mshopf
* reworked debug messages.
* removed FakeIbrowse switch (finally they made it...).
* debug switch.
*
* Revision 0.19 1996/07/30 13:57:03 mshopf
* adding shutdown / error messages to bottom of page.
* keepbad, proxylocal, ibrowse switches added.
* added queueing.
* small bug fixes.
* no url shutdown queueing yet.
*
* Revision 0.18 1996/07/17 16:42:42 mshopf
* support of new cache system.
*
* Revision 0.17 1996/06/06 23:01:23 mshopf
* cosmetic changes (httpproxy support page).
* added AlwaysReload.
*
* Revision 0.16 1996/06/03 04:08:19 mshopf
* changed strerror handling.
* added timeouts.
* url parsing bug fixes.
* connection close bug fix.
* shutdown url consistency bug fix.
*
* Revision 0.15 1996/04/26 05:14:03 mshopf
* added ExitAll, service scans.
* V0.13 alpha 5 fix.
*
* Revision 0.14 1996/04/24 17:37:14 mshopf
* small bug fixes for as225.
*
* Revision 0.13 1996/04/24 03:20:13 mshopf
* encapsulated network module.
*
* Revision 0.12 1996/02/12 19:23:36 mshopf
* assert() to logfile and debugfile and stderr.
* MaxRequests now variable.
* ReadCacheList()/SaveCacheList().
* lots of bug fixes and cosmetic changes.
*
* Revision 0.11 1996/01/15 22:27:58 mshopf
* fixed AmiTCP4.0 broken time() / stat() times (fix by Fionn Behrens).
* another couple of bug fixes.
* added dynamic cache table allocation.
* error replies still getting better.
* removed host realname lookup.
*
* Revision 0.10 1996/01/09 17:20:35 mshopf
* Three major and some other minor bug fixes.
* Added UrlBuffer for POST method and non-proxyproxy support.
* More and more comformant error messages.
* basic POST support (only proxying, no caching, no queuing).
* Sending data even when request is not completed yet.
* Recreating URL even from nonconformant requests.
* Long requests supported now.
* Some cosmetic changes.
*
* Revision 0.9 1995/12/06 19:53:55 mshopf
* added fixes for unix machines and AmiTCP4.0 compilation.
* some bug fixes.
* more html conform now.
* lots of printf type fixes.
*
* Revision 0.8 1995/12/03 14:20:23 mshopf
* Made auto requests and proxy messages more http conform.
*
* Revision 0.7 1995/11/19 17:57:50 mshopf
* added revbump compatible version string.
* fixed ftp offline proxyproxy std port bug.
*
* Revision 0.6 1995/11/04 11:26:13 mshopf
* better shutdown (requeueing of current transmissions). small bug fixes.
*
* Revision 0.5 1995/11/02 18:26:13 mshopf
* queueing system implemented.
*
* Revision 0.4 1995/10/21 21:28:37 mshopf
* small bug fix.
*
* Revision 0.3 1995/10/17 19:23:09 mshopf
* cleaned up messy logging system.
* new option 'log'.
*
* Revision 0.2 1995/10/13 18:09:17 mshopf
* everything works so far, exceptions are noted in the header.
* :-)
*
* Revision 0.1 1995/10/13 10:44:27 mshopf
* no caching at all right now, but it works fine as a proxyproxy.
*
*/
/*)) */
/*(( "Includes" */
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include <ctype.h>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "httpproxy.h"
#include "net.h"
#include "logging.h"
#include "cache.h"
#include "queue.h"
#include "httpproxy_rev.h"
#ifdef _AMIGA
# define strcasecmp stricmp
# define strncasecmp strnicmp
# define CURRENTDIR ""
# ifdef FIXTIME
/* Fix broken AmiTCP4.0 (or SasC+AmiTCP4.0) time() / stat() */
# include <proto/dos.h>
time_t time (time_t *TimePtr)
{
struct DateStamp ds;
time_t t;
DateStamp (&ds);
/* Sekunden + Minuten*60 +
* (Tage + offset zu Unix time()[2922 Tage]) * 60 * 60 * 24 */
t = ds.ds_Tick / 50+ds.ds_Minute*60+(ds.ds_Days+2922)*86400;
if (TimePtr)
*TimePtr = t;
return (t);
}
# endif
#else
# define CURRENTDIR "." /* rudimentary unix support (would need work...) */
#endif
/*)) */
/*(( "Global Variables" */
#ifdef DEBUG
int DebugLevel = (1<<D_REQUEST) | (1<<D_CHECK) | (1<<D_SCAN) | (1<<D_QUEUE) |
(1<<D_MSG) | (1<<D_FILES) | (1<<D_ALWAYS);
#endif
/* To be kept conform with testAmiTCP.rexx tests */
#define TEST_NOSERVER "Serversocket not supported by installed TCP/IP-Stack."
#define TEST_NOTCPIP "TCP/IP stack not running."
extern long __oslibversion = 37; /* Minimum: Kick 2.0 */
request_t *Requests;
int OffLine = FALSE; /* 0: get cache files again, when they are too old */
/* 1: keep cache files and queue them */
int GetQueued = FALSE; /* 1: get queued data from remote hosts or proxyproxy */
int AlwaysReload = FALSE; /* 1: reload *all* requests */
int QueueMode = TRUE; /* queue requests not yet in the cache */
struct DateStamp RequestStamp; /* Current time while request is processed */
/* Global variables for httpproxy.c */
static netmethods_t *Net = NULL;
static char *VVersion = VERSTAG;
static char *Version = VERSTRING;
static char *PrgName;
#define TRANSFER_INT "</PRE></TT></TABLE><P><HR>"
static const char *TShutdown = TRANSFER_INT "<H1>Shutdown before finished...</H1><HR><ADDRESS>";
static const char *TReadErr = TRANSFER_INT "<H1>Read error...</H1><HR><ADDRESS>";
static const char *TWriteErr = TRANSFER_INT "<H1>Write error...</H1><HR><ADDRESS>";
static const char *TTimeout = TRANSFER_INT "<H1>Timeout...</H1><HR><ADDRESS>";
static const char *HtmlVersion = "<A HREF=\"http://wwwcip.informatik.uni-erlangen.de/user/mshopf/httpproxy.html\">" VERSTRING
"</A> by <A HREF=\"http://wwwcip.informatik.uni-erlangen.de/user/mshopf/\">Matthias Hopf</A>";
static int ServerPort = DEFAULT_PROXYPORT;
static int ProxyProxy = FALSE; /* TRUE, when all requests (except Http, see below) should be
* forwarded to another proxy. */
static int HttpProxyProxy = FALSE; /* TRUE, when HTTP requests should be forwarded, too. */
static const char *ProxyHost;
static int ProxyPort;
struct DateStamp NextCheckStamp; /* wait stamp for next Timeout check */
static char LastUrl [MAX_URLBUFFER];
static time_t ThisRequestTime = 0;
static time_t StartUpTime;
static int MaxRequests = DEFAULT_MAX_REQUESTS;
static int MinRequests = DEFAULT_MIN_REQUESTS;
static int RequestsFree = -1;
static u_long DelCacheTime = DEFAULT_DELTIME;
static u_long ExpireCacheTime = DEFAULT_EXPIRETIME;
static u_long ReloadCacheTime = DEFAULT_RELOADTIME;
static int CacheUnreadRequests = FALSE; /* 1: keep cache data on data connection close with data in url send buffer */
static int KeepUnfinished = FALSE; /* 1: keep not finished requests in the cache */
static int ProxyLocal = FALSE; /* proxy localhost requests */
static int TimeoutTime = DEFAULT_TIMEOUT; /* seconds to timeout */
void DeleteConnect (request_t *Req, int ok);
/*)) */
/*(( "Init()/ExitAll()" */
/* Init all global variables, open server port */
void Init (char *ProxyProxyHost, int ProxyProxyPort, char *LogName, char **Argv)
{
#ifdef DEBUG
char Buffer [1024]; /* Buffer for startup parameters */
char *Buf;
#endif
ProxyHost = ProxyProxyHost;
ProxyPort = ProxyProxyPort;
/* Standard setups */
LogOpenFile (LogName);
#ifdef DEBUG
assert (*Argv); /* print out startup arguments */
Buf = Buffer;
while (*Argv)
{
sprintf (Buf, " \"%s\"", *Argv++);
Buf += strlen (Buf);
}
LogSpecial ("Startup: %s\n", Buffer);
#endif
if (! (Requests = calloc (MaxRequests, sizeof (request_t))) )
{
fprintf (stderr, "not enough memory (need %d Kbyte)\n", NEED_MEM);
ExitAll (20);
}
RequestsFree = MaxRequests;
ThisRequestTime = StartUpTime = time (NULL); //TODO: remove
DateStamp (&NextCheckStamp);
RequestStamp = NextCheckStamp;
/* Create Serversocket */
if (! Net->server (ServerPort))
{
fprintf (stderr, "%s\n", Net->strerror (errno));
fprintf (stdout, TEST_NOSERVER "\n");
ExitAll (10);
}
/* Initialize caching system */
if (! CacheInit ())
{
fprintf (stderr, "cache system initialization failed!\n");
ExitAll (10);
}
if (! QueueInit ())
fprintf (stderr, "queue system initialization failed - continuing\n");
if (OffLine)
LogSpecial ("%s: starting type %s offline\n", Version, Net->Descr);
else if (ProxyProxyHost)
LogSpecial ("%s: starting type %s online with proxyproxy host '%s', port %d\n",
Version, Net->Descr, ProxyProxyHost, ProxyProxyPort);
else
LogSpecial ("%s: starting type %s online without proxyproxy\n", Version, Net->Descr);
}
/* Close network if necessary, exit with return code */
void ExitAll (int Ret)
{
QueueExit ();
CacheExit ();
if (Net)
Net->exit ();
Net = NULL;
LogCloseFile ();
exit (Ret);
/* NOTREACHED */
}
/*)) */
/*(( "ErrToReq()" */
/* Type an error to a data buffer of a pending request and set everything up.
* The error is printed into the Logstream, too. When Errno < 0, a information
* message is sent (Short is not sent but used for logging when != NULL). */
void ErrToReq (request_t *Req, int Number, int ErrNo, const char *Short, const char *Descr, const char *Url)
{
Req->DataSent = 0;
if (Req->Flags & REQ_HTTP1X0)
{
if (ErrNo >= 0)
{
LogErr (Req, L_ERRMSG, Url, ErrNo, "%s", Short);
sprintf (Req->DataBuffer, "HTTP/1.0 %03d %s\r\n"
"Server: %s\r\n"
"Content-Type: text/html\r\n\r\n"
"<HTML><HEAD><TITLE>Proxy Error</TITLE></HEAD>\n"
"<BODY><H1>Proxy Error: %s</H1><P>\n"
"%s%s%s"
"%s<P>\n"
"<HR><ADDRESS>%s, running on %s</ADDRESS>\n"
"</BODY></HTML>\n",
Number, Short, VERSHTTP, Short,
(ErrNo > 0 ? "<H3>Cause: " : ""),
(ErrNo > 0 ? Net->strerror (ErrNo) : ""),
(ErrNo > 0 ? "</H3>\n" : ""),
Descr, HtmlVersion, Net->HostName);
}
else
{
if (Short)
LogErr (Req, L_MSG, Url, 0, "%s", Short);
sprintf (Req->DataBuffer, "HTTP/1.0 %03d Proxy Message\r\n"
"Server: %s\r\n"
"Content-Type: text/html\r\n\r\n"
"<HTML><HEAD><TITLE>Proxy Message</TITLE></HEAD>\n"
"<BODY><H3>Proxy Message:</H3><P>\n"
"%s<P>\n"
"<HR><ADDRESS>%s, running on %s</ADDRESS>\n"
"</BODY></HTML>\n",
Number, VERSHTTP, Descr, HtmlVersion, Net->HostName);
}
}
else
{
if (ErrNo >= 0)
{
LogErr (Req, L_ERRMSG, Url, ErrNo, "%s", Short);
sprintf (Req->DataBuffer, "<HTML><HEAD><TITLE>Proxy Error</TITLE></HEAD>\n"
"<BODY><H1>Proxy Error: %s</H1><P>\n"
"%s%s%s"
"%s<P>\n"
"<HR><ADDRESS>%s, running on %s</ADDRESS>\n"
"</BODY></HTML>\n",
Short,
(ErrNo > 0 ? "<H3>Cause: " : ""),
(ErrNo > 0 ? Net->strerror (ErrNo) : ""),
(ErrNo > 0 ? "</H3>\n" : ""),
Descr, HtmlVersion, Net->HostName);
}
else
{
if (Short)
LogErr (Req, L_MSG, Url, 0, "%s", Short);
sprintf (Req->DataBuffer, "<HTML><HEAD><TITLE>Proxy Message</TITLE></HEAD>\n"
"<BODY><H3>Proxy Message:</H3><P>\n"
"%s<P>\n"
"<HR><ADDRESS>%s, running on %s</ADDRESS>\n"
"</BODY></HTML>\n",
Descr, HtmlVersion, Net->HostName);
}
}
Req->DataRecv = strlen (Req->DataBuffer);
Req->Flags |= REQ_DONE;
if (Req->Flags & REQ_CONNSOCKET)
DeleteConnect (Req, FALSE);
}
/*)) */
/*(( "FileToReq() / ErrToData()" */
/* Type a file into the DataBuffer.
* This file has to be short enough to fit in this buffer. */
void FileToReq (request_t *Req, const char *File)
{
FILE *f;
int max, len;
Req->Flags |= REQ_DONE;
debug (D_MSG, ("%02d: sending internal file '%s'\n", Req-Requests, File));
if (! (f = fopen (File, "r")) )
{
LogErr (Req, L_ERRMSG, NULL, errno, "on opening internal file '%s'", File);
return;
}
max = MAX_DATABUFFER - Req->DataRecv;
if ( (len = fread (& Req->DataBuffer [Req->DataRecv], 1, max, f)) +0 == max)
LogErr (Req, L_INFO, NULL, 0, "internal file '%s' too long - truncated", File);
Req->DataRecv += len;
fclose (f);
}
/* Type an error to the ending of a data buffer of a working request.
* The error is not printed into the Logstream. Total maximum size of
* error messages is RESERVED_DATABUFFER. When KeepCache is true,
* the error is written to the cache file, too. */
void ErrToData (request_t *Req, const char *Err, int KeepCache)
{
debug (D_MSG, ("%02d: adding error message to request: '%s'\n", Req-Requests, Err));
if (KeepCache)
{
CacheWrite (& Req->Cache, Err, strlen (Err));
CacheWrite (& Req->Cache, HtmlVersion, strlen (HtmlVersion));
}
if (Req->Flags & REQ_REQSOCKET)
{
assert (Req->DataRecv + strlen (Err) + strlen (HtmlVersion) < MAX_DATABUFFER);
strcpy (& Req->DataBuffer [Req->DataRecv], Err);
Req->DataRecv += strlen (Err);
strcpy (& Req->DataBuffer [Req->DataRecv], HtmlVersion);
Req->DataRecv += strlen (HtmlVersion);
}
}
/*)) */
/*(( "CheckCacheTime()" */
/* Check the modification time and date of a cache entry; set Time to 0 to force expire. */
/* only to be called when cache file exists. */
/* Returns -1, when the cache entry is expired or to be reloaded.
* Returns -2, when the cache entry is expired and queued. */
int CheckCacheTime (cachefile_t *c, const char *Url, u_long Time)
{
time_t FileT = -1;
struct stat s;
// TODO: use cache system
if (stat (c->FileName, &s) >= 0)
{
FileT = s.st_mtime;
debug (D_CHECK, ("local 0x%lx, access 0x%lx -> %s\n", ThisRequestTime, FileT,
difftime (ThisRequestTime, FileT) > Time ? "expired" : "valid"));
if ((! Time) || difftime (ThisRequestTime, FileT) > Time)
{
if (OffLine)
{
if (QueueMode || (! Time))
{
QueueQueue (Url);
dreturn (D_CHECK, -2);
}
/* else: go on */
}
else if (QueueCheck (Url)) /* Is the entry queued and not yet sent? */
dreturn (D_CHECK, OffLine ? -2 : -1);
else
FileT = -1;
}
}
if (FileT == -1)
{
if (QueueCheck (Url))
{
debug (D_ALWAYS, ("cache entry '%s' not yet there (queued) - This should not happen...\n", c->File));
fprintf (stderr, "%s: Warning! Data consistency failure, line %d\n", PrgName, __LINE__);
dreturn (D_CHECK, -2);
}
else
{
debug (D_CHECK, ("cache entry '%s' expired / to be reloaded, removing\n", c->File));
dreturn (D_CHECK, -1);
}
}
dreturn (D_CHECK, (QueueMode && QueueCheck (Url)) ? -2 : 0);
}
/*)) */
/*(( "IsGfx()" */
/* test whether a specific Url is a graphic */
int IsGfx (const char *Url)
{
int len = strlen (Url);
if (strcasecmp (&Url [len-4], ".gif") == 0 ||
strcasecmp (&Url [len-4], ".jpg") == 0 ||
strcasecmp (&Url [len-5], ".jpeg") == 0 ||
strcasecmp (&Url [len-4], ".iff") == 0 ||
strcasecmp (&Url [len-4], ".png") == 0 ||
strcasecmp (&Url [len-4], ".xbm") == 0 )
return TRUE;
return FALSE;
}
/*)) */
/*(( "ScanCache()" */
/* Scan whether a specific url is already cached, filled and ready to serve */
/* Returns 0: no, not there 1: yes, all set up -1: Failure
* 2: error message in buffer */
/* Sets up the request struct for correct filling, opens the cache, etc. */
int ScanCache (request_t *Req, char *Url)
{
static time_t LastRequestTime;
cachefile_t *c = & Req->Cache;
LastRequestTime = ThisRequestTime;
ThisRequestTime = time (NULL);
debug (D_CHECK, ("Url '%s' - reload request: %s\n", Url, difftime (ThisRequestTime, LastRequestTime) < ReloadCacheTime ? "yes" : "no"));
switch (CacheGet (c, Url, Req)) {
case 1: /****** found valid cache file */
switch (CheckCacheTime (c, Url, ExpireCacheTime)) {
case 0:
if (AlwaysReload || (strcmp (LastUrl, Url) == 0 && difftime (ThisRequestTime, LastRequestTime) < ReloadCacheTime))
{
if (CheckCacheTime (c, Url, 0) == -2) /* force queueing / reloading */
{
ErrToReq (Req, 202, -1, NULL, "Your request for reloading the document is queued.<BR>\n"
"You will get the new document next time you are online.<BR>\n"
"An expired cache entry exists and can be viewed by immedeately reloading this document.",
NULL);
dreturn (D_CHECK, 2);
}
break; /* forcing reload of url */
}
strcpy (LastUrl, Url);
if (CacheOpenOld (c) == -1)
{
LogErr (Req, L_WARN, Url, errno, "cannot open cache file '%s', removing", c->FileName);
CacheRemove (c->FileName, TRUE);
}
else
{
Req->Flags |= REQ_DONE;
dreturn (D_CHECK, 1);
}
break;
/* case -1: break; */
case -2:
if (strcmp (LastUrl, Url) == 0 && difftime (ThisRequestTime, LastRequestTime) < ReloadCacheTime)
{
debug (D_CHECK, ("Sending (invalid) cache file '%s' on request\n", c->File));
if (CacheOpenOld (c) == -1)
{
LogErr (Req, L_WARN, Url, errno, "cannot open cache file '%s', removing", c->FileName);
CacheRemove (c->FileName, TRUE);
}
else
{
Req->Flags |= REQ_DONE;
dreturn (D_CHECK, 1);
}
}
else
{
strcpy (LastUrl, Url);
if (OffLine)
{
if (IsGfx (Url))
{
/* don't output up queueing information, but queue request nevertheless */
debug (D_CHECK, ("Sending (invalid) queued graphics cache file '%s'\n", c->File));
if (CacheOpenOld (c) == -1)
{
LogErr (Req, L_WARN, Url, errno, "cannot open cache file '%s', removing", c->FileName);
CacheRemove (c->FileName, TRUE);
}
else
{
Req->Flags |= REQ_DONE;
dreturn (D_CHECK, 1);
}
}
ErrToReq (Req, 202, -1, NULL, "Your request is queued.<BR>\n"
"You will get the document next time you are online.<BR>\n"
"An expired cache entry exists and can be viewed by immedeately reloading this document.",
NULL);
dreturn (D_CHECK, 2);
}
else
debug (D_CHECK, ("unqueueing and getting Cache entry\n"));
break;
}
}
break;
case 0: /****** no valid cache file */
if (QueueCheck (Url))
{
if (OffLine)
{
if (strcmp (LastUrl, Url) == 0 && difftime (ThisRequestTime, LastRequestTime) < ReloadCacheTime)
{
ErrToReq (Req, 404, 0, "Already queued", "Your request is already queued.<BR>\n"
"You will get the document next time you are online.<BR>\n"
"You tried to get an expired cache entry, but this document is not cached right now.",
NULL);
dreturn (D_CHECK, 2);
}
else
{
strcpy (LastUrl, Url);
if (IsGfx (Url))
FileToReq (Req, "@msg/queued.gfx");
else
ErrToReq (Req, 202, -1, NULL, "Your request is already queued.<BR>\n"
"You will get the document next time you are online.<BR>\n"
"There is no expired cache entry for this document.",
NULL);
dreturn (D_CHECK, 2);
}
}
else
debug (D_CHECK, ("unqueueing and getting Cache entry\n"));
}
else
debug (D_CHECK, ("new cache entry\n"));
break;
case -1: /****** internal error */
if (OffLine)
{
ErrToReq (Req, 500, 0, "Queueing not possible", "Your request can not be queued or served.<BR>\n"
"While you tried to get an document " PRG_NAME " determined either an internal error or "
"a doublicate hash entry. These doublicate hash entries can not be cashed.",
NULL);
dreturn (D_CHECK, 2);
}
dreturn (D_CHECK, 0); /* just proxy it */
}
if (OffLine)
{
if (QueueMode)
{
QueueQueue (Url);
if (IsGfx (Url))
FileToReq (Req, "@msg/queued.gfx");
else
ErrToReq (Req, 202, -1, NULL, "Your new request is queued.<BR>\n"
"You will get the document next time you are online.", NULL);
dreturn (D_CHECK, 2);
}
if (IsGfx (Url))
{
Req->Flags |= REQ_DONE;
dreturn (D_CHECK, 2); /* send an empty file... */
}
ErrToReq (Req, 202, -1, NULL, "No cache available.<BR>\n"
"There is no cache for the requested document available.<BR>\n"
"You may queue the document to get it next time you are online by immedeately reloading this document.",
NULL);
dreturn (D_CHECK, 2);
}
if (CacheOpenNew (c, Url) == -1)
dreturn (D_CHECK, 0); /* just proxy it */
Req->Flags |= REQ_NEWCACHE;
dreturn (D_CHECK, 0);
}
/*)) */
/*(( "GetCacheData ()" */
/* Fill request data space with data from cache
* when a read error occures, no data will be received and ServWrite()
* will automagically terminate the socket. */
void GetCacheData (request_t *Req)
{
int Bytes;
assert (Req->DataRecv < MAX_DATABUFFER); /* No RESERVED_DATABUFFER for error messages is needed */
debug (D_IO, ("Getting more cache data - Url done: %s\n", Req->Flags & REQ_REQDONE ? "yes" : "no"));
if ( (Bytes = CacheRead (& Req->Cache, & Req->DataBuffer [Req->DataRecv],
MAX_DATABUFFER - Req->DataRecv)) > 0)
Req->DataRecv += Bytes;
else
if (Bytes < 0)
LogErr (Req, L_ERROR, NULL, -1, "read error on cache file '%s'", Req->Cache.FileName);
debug (D_IO, ("Read %d bytes\n", Bytes));
}
/*)) */
/*(( "BuildFdSets ()" */
/* Build fdsets (outstanding reads and writes) */
void BuildFdSets (void)
{
int i;
request_t *Req;
assert (RequestsFree >= 0);
debug (D_IO, ("Build: "));
Net->initfd (RequestsFree > 0);
for (i=0, Req=Requests; i < MaxRequests; i++, Req++)
{
if (Req->Flags & REQ_REQSOCKET)
{
assert (Req->ReqSocket != 0);
if (Req->ReqRecv < MAX_REQBUFFER-1)
{
debugraw (D_IO, (", Read Req %d", Req-Requests));
Net->setfdread (Req->ReqSocket);
}
if (Req->DataRecv > Req->DataSent)
{
debugraw (D_IO, (", Write Req %d", Req-Requests));
Net->setfdwrite (Req->ReqSocket);
}
}
if (Req->Flags & REQ_CONNSOCKET)
{
assert (Req->ConnSocket != 0);
if (Req->ReqRecv > Req->ReqSent || Req->UrlRecv > Req->UrlSent) /* Waiting for connect or sending request */
{
debugraw (D_IO, (", Write Con %d", Req-Requests));
Net->setfdwrite (Req->ConnSocket);
}
if (Req->DataRecv < MAX_DATABUFFER - RESERVED_DATABUFFER) /* Transfer Data */
{
debugraw (D_IO, (", Read Con %d", Req-Requests));
Net->setfdread (Req->ConnSocket);
}
}
}
debugraw (D_IO, ("\n"));
}
/*)) */
/*(( "CreateUrlRequest()/CheckUrl()" */
/* Create Url request according to specification */
/* Returns the length of the request. */
/* Prot and Host may be NULL, in that case Name is to be asumed to be a full URL.
* No checks for Proxyproxy mode are done in this case. */
size_t CreateUrlRequest (const char *Method, const char *Prot, const char *Host, int Port, const char *Name, char *ReqSpace, int HttpOne, int ExpandProcent)
{
char Buffer [MAX_URLBUFFER];
char *p, *DokPrint;
if (Prot && Host)
{
if ( (strcmp (Prot, "http") == 0) ? HttpProxyProxy : ProxyProxy)
{
if (Port > -1)
sprintf (ReqSpace, "%s %s://%s:%d/", Method, Prot, Host, Port);
else
sprintf (ReqSpace, "%s %s://%s/", Method, Prot, Host);
}
else
sprintf (ReqSpace, "%s /", Method);
}
else
sprintf (ReqSpace, "%s ", Method);
DokPrint = ReqSpace + strlen (ReqSpace);
for (p = Buffer; *Name; )
if (isvalidhttp (*Name) || (*Name == '%' && ! ExpandProcent))
*p++ = *Name++;
else
{
sprintf (p, "%%%02x", *Name++);
p += 3;
}
*p = '\0';
debug (D_CHECK, ("Request '%s' created - HTTP1.0:%s\n", ReqSpace, HttpOne ? "yes" : "no"));
if (HttpOne)
sprintf (DokPrint, "%s HTTP/1.0\r\n", Buffer);
else
sprintf (DokPrint, "%s\r\n", Buffer);
return (strlen (ReqSpace));
}
/* Check whether the URL is read completely. Sets REQ_REQDONE accordingly.
* Attention! This routine relys on the fact, that ScanUrl was called already!
* That means especially, that the first line was already read completely. */
/* The routine does not check for Url termination on Buffer boundaries. So
* the ReqBuffer has to be shifted always (four bytes remaining always) and
* never be cleared at all. */
void CheckUrl (request_t *Req)
{
debug (D_IO, ("Checking Url\n"));
if (Req->Flags & REQ_HTTP1X0)
{
if (strstr (Req->ReqBuffer, "\n\n") || strstr (Req->ReqBuffer, "\r\r") ||
strstr (Req->ReqBuffer, "\n\r\n\r") || strstr (Req->ReqBuffer, "\r\n\r\n"))
Req->Flags |= REQ_REQDONE;
}
else
Req->Flags |= REQ_REQDONE;
#ifdef DEBUG
if (Req->Flags & REQ_REQDONE)
debug (D_IO, ("http request complete\n"));
#endif
}
/*)) */
/*(( "ScanUrl ()" */
/* Scan the URL and divide it into several parts. Understands http/0.9
* and http/1.0 versions right now. Sets REQ_REQDONE when URL is complete. */
/* Req->UrlBuffer is valid after calling this routine and REQ_URLDONE set. */
/* Returns -1 in case of failure, 0 on correct termination or found cache,
* 1: found cache slot, FileD is open, 2: failure, cache contains error. */
/* Port and Address are value-return arguments. */
int ScanUrl (request_t *Req, char *Address, int *Port, const char **SaveUrl, const char **SaveProt)
{
static char BufDOK [MAX_URLBUFFER +256]; /* a critical array... but that's enough */
static char BufPROT [16];
char BufMETH [12];
char BufURL [1024];
char BufVERS [8];
int CharsRead, CharsRead2, i;
char *DokPtr, *PortPtr;
char *Host, *ObjectName;
if (sscanf (Req->ReqBuffer, "%11s %15[a-zA-Z0-9]%n", BufMETH, BufPROT, &CharsRead) < 2)
return (-1);
debug (D_SCAN, ("METHOD '%s', PROT '%s'\n", BufMETH, BufPROT));
if (CharsRead >= Req->ReqRecv)
return (-1);
if (strncmp (& Req->ReqBuffer [CharsRead], "://", 3) != 0)
return (-1);
if (sscanf (& Req->ReqBuffer [CharsRead+3], "%1022s%n", BufURL, &CharsRead2) < 1)
return (-1);
debug (D_SCAN, ("URL '%s'\n", BufURL));
CharsRead += 3 + CharsRead2;
if (! (DokPtr = strchr (BufURL, '/')) )
DokPtr = BufURL + strlen (BufURL); /* empty Objectname (Root) */
else
*DokPtr++ = 0;
if (strcasecmp (BufPROT, "http") != 0)
*Port = -1;
else
*Port = Net->StdHttpPort;
if ( (PortPtr = strchr (BufURL, ':')) )
{
*PortPtr = 0;
*Port = atoi (PortPtr + 1);
}
Host = BufURL;
strncpy (Address, BufURL, MAX_HOSTNAMELEN-1);
Address [MAX_HOSTNAMELEN-1] = '\0';
BufVERS [2] = 0; /* BufVERS is missused here... */
if (*Port > -1)
sprintf (BufDOK, "%s://%s:%d/", BufPROT, Host, *Port);
else
sprintf (BufDOK, "%s://%s/", BufPROT, Host);
ObjectName = BufDOK + strlen (BufDOK);
for (PortPtr = ObjectName; *DokPtr; DokPtr++, PortPtr++)
{
if (*DokPtr == '%')
{
if (! (BufVERS [0] = *++DokPtr) )
break;
if (! (BufVERS [1] = *++DokPtr) )
break;
if ( (i = strtol (BufVERS, NULL, 0x10)) )
*PortPtr = i;
else
*PortPtr = '?';
}
else
*PortPtr = *DokPtr;
}
*PortPtr = 0;
*SaveUrl = BufDOK;
*SaveProt = BufPROT;
debug (D_SCAN, ("Proto '%s', Host '%s', Port %d, Dok '%s'\n", BufPROT, Host, *Port, BufDOK));
if (OffLine && strcasecmp (BufMETH, "get") != 0) /* no get command */
return (-1);
if (! ProxyProxy)
if (strcasecmp (BufPROT, "http") != 0) /* we only support http urls so far */
return (-1);
if (Req->ReqBuffer [CharsRead] == ' ' && CharsRead < Req->ReqRecv)
if (sscanf (& Req->ReqBuffer [CharsRead], " %5s", BufVERS) == 1)
if (strcasecmp (BufVERS, "http/") == 0)
{
debug (D_SCAN, ("got HTTP/1.0 or greater request\n"));
Req->Flags |= REQ_HTTP1X0;
}
CheckUrl (Req);
Req->Flags |= REQ_URLDONE;
strncpy (Req->Url, BufDOK, MAX_URLSAVE-1);
Req->Url [MAX_URLSAVE-1] = '\0';
if (strlen (BufDOK) > MAX_URLSAVE-1) /* That one cannot be cached */
{
LogErr (Req, L_INFO, BufDOK, 0, "ReqBuffer size (%d chars) exceeded - document is not cached", MAX_URLSAVE-1);
strncpy (Req->Url, BufDOK, MAX_URLSAVE-1);
Req->Url [MAX_URLSAVE-1] = '\0';
return (0);
}
/* check for service URLs */
if (strcasecmp (Host, "proxy...") == 0)
{
if (strcasecmp (Req->Address, "localhost") == 0 || strcmp (Req->Address, "127.0.0.1") == 0)
return (ScanService (Req, ObjectName));
else
{
ErrToReq (Req, 403, -1, "Service URLs are not allowed", "You are not allowed to request service URLs"
"from other hosts than the one httpproxy is started on.",
BufDOK);
LogErr (Req, L_INFO, BufDOK, 0, "Service URL tried from '%s'", Req->Address);
return (2);
}
}
/* create UrlBuffer entry */
assert (Req->UrlSent == 0);
Req->UrlRecv = CreateUrlRequest (BufMETH, BufPROT, Host, *Port, ObjectName, Req->UrlBuffer, Req->Flags & REQ_HTTP1X0, FALSE);
PortPtr = strchr (& Req->ReqBuffer [CharsRead], '\n'); /* PortPtr und DokPtr are missused here */
DokPtr = strchr (& Req->ReqBuffer [CharsRead], '\r');
assert (DokPtr == NULL || PortPtr == NULL || PortPtr == DokPtr + 1 || DokPtr == PortPtr + 1);
if (DokPtr == PortPtr + 1 || PortPtr == NULL)
PortPtr = DokPtr;
assert (PortPtr != NULL);
Req->ReqSent = PortPtr - Req->ReqBuffer + 1;
if (ProxyLocal)
if (strcasecmp (Host, "localhost") == 0 || strncasecmp (Host, Net->HostName, strlen (Host)) == 0)
return (0); /* proxy localhost requests */
if (strcasecmp (BufMETH, "get") != 0) /* only get's will be cached */
{
debug (D_ALWAYS, ("no get method - no caching\n"));
return (0);
}
return (ScanCache (Req, BufDOK));
}
/*)) */
/*(( "DeleteRequest/Connect ()" */
/* Close the connection socket only. When a request socket is still open
* and no data is in the cache, close all.
* Delete cache entry in case of an error. Check the answer whether it is
* an error message or a regular answer. Errors should not be cached! */
/* When Flags == 0 nothing is done at all */
void DeleteConnect (request_t *Req, int ok)
{
int CacheOk;
CacheOk = (Req->Flags & REQ_NEWCACHE) ? ok : TRUE;
debug (D_REQUEST, ("%02d: DeleteCon Flags 0x%x (ok:%s cacheok:%s)\n", Req-Requests, Req->Flags, ok ? "yes" : "no", CacheOk ? "yes" : "no"));
if (! Req->Flags)
return;
if (Req->Flags & (REQ_CONNSOCKET | REQ_CONNERR))
{
if (Req->Flags & REQ_CONNSOCKET)
Net->close (Req->ConnSocket);
Req->Flags &= ~(REQ_CONNSOCKET | REQ_CONNERR);
if (ok)
{
if (Req->Flags & REQ_NEWCACHE)
LogStd (Req, "new");
else
LogStd (Req, "proxied");
}
}
LogStd (Req, NULL); /* There really *should* be a log entry */
if (Req->Flags & REQ_REQSOCKET)
{
if (Req->DataRecv <= Req->DataSent && (Req->Flags & REQ_REQDONE))
{
Net->close (Req->ReqSocket); /* All done. Hugh! */
Req->Flags = 0;
}
else
Req->Flags |= REQ_DONE;
}
else
Req->Flags = 0;
CacheClose (& Req->Cache, CacheOk);
if (! (Req->Flags & REQ_PENDINGMASK))
{
Req->Flags = 0;
RequestsFree++;
}
}
/* Close the request socket only. When a connection socket is already up
* (and enough data is already read) continue filling up the cache. When
* everything is done, close all. The URL should be checked, too.
* When it is not complete, all connections should be terminated
* on CacheUnreadRequests false. */
/* Don't call DeleteConnect when Flags == 0... */
void DeleteRequest (request_t *Req, int ok)
{
int CacheOk;
CacheOk = (Req->Flags & REQ_NEWCACHE) ? ok : TRUE;
debug (D_REQUEST, ("%02d: DeleteReq Flags 0x%x (ok:%s cacheok:%s)\n", Req-Requests, Req->Flags, ok ? "yes" : "no", CacheOk ? "yes" : "no"));
if (Req->Flags & REQ_REQSOCKET)
{
Net->close (Req->ReqSocket);
Req->Flags &= ~REQ_REQSOCKET;
}
if (Req->Flags & REQ_DONE) /* Maybe we're getting data from the cache */
{
if (QueueCheck (Req->Url))
LogStd (Req, "queued");
else
LogStd (Req, "cached");
CacheClose (& Req->Cache, CacheOk);
DeleteConnect (Req, ok);
debug (D_REQUEST, ("All done - deleterequest\n"));
}
else if (Req->Flags & REQ_REQDONE)
{
if (! (Req->Flags & REQ_CONNSOCKET)) /* Aborting cache sending */
DeleteConnect (Req, FALSE);
else if (CacheUnreadRequests)
Req->DataSent = Req->DataRecv = 0; /* Continue getting data into the cache */
else
DeleteConnect (Req, FALSE); /* Remove connection */
}
else
{
Req->Flags |= REQ_DONE;
DeleteConnect (Req, FALSE); /* No request known so far... */
CacheClose (& Req->Cache, CacheOk);
}
}
/*)) */
/*(( "ServConnect ()" */
/* The first line of the URL is already there, so we can connect to the
* remote host. */
void ServConnect (request_t *Req)
{
char Host [MAX_HOSTNAMELEN];
int Port, UseProxy = FALSE;
const char *Url = NULL; /* only for logging */
const char *Proto = NULL; /* only for proxyproxy check */
debug (D_REQUEST, ("%02d: ServCon for Req; Flags 0x%x\n", Req-Requests, Req->Flags));
if (Req->Flags & REQ_REQSOCKET)
{
if (strchr (Req->ReqBuffer, '\n') == 0 &&
strchr (Req->ReqBuffer, '\r') == 0) /* first line of URL is not there... */
return;
if (Req->Flags & REQ_URLDONE) /* already set up */
{
CheckUrl (Req);
return;
}
}
switch (ScanUrl (Req, Host, &Port, &Url, &Proto)) {
case 0: /* need to get document from remote host */
#ifdef DEBUG
if (! (Req->Flags & REQ_NEWCACHE))
debug (D_REQUEST, ("%02d: serving uncacheable request '%s'\n",
Req-Requests, Req->ReqBuffer));
#endif
break;
case 1: /* already cached - no need to connect to remote host */
GetCacheData (Req);
return;
case 2: /* send error message from buffer */
return;
default:
{
Req->Flags |= REQ_DONE | REQ_REQDONE;
ErrToReq (Req, 400, 0, "Invalid Request", "Your Request is not a valid URL.<BR>\n"
"Please check it and try again. Perhaps you tried to queue a page not using the 'GET' method\n"
"or you tried to get a non-HTTP: URL without proxyproxy.",
Url);
return;
}
}
UseProxy = (strcmp (Proto, "http") == 0) ? HttpProxyProxy : ProxyProxy;
if (OffLine)
{
ErrToReq (Req, 500, 0, "Unable to serv", "It is not possible to connect to a remote host in offline mode.<BR>\n"
"Either the requested URL is to long to be queued or an internal error has occured. When the\n"
"URL is rather short, please contact the author of " PRG_NAME,
Url);
return;
}
if (UseProxy)
{
if ( (Req->ConnSocket = Net->open (ProxyHost, ProxyPort)) < 0)
{
Req->Flags |= REQ_CONNERR;
ErrToReq (Req, 404, errno, "Proxyproxy Host Unreachable / read() failed",
"The host you specified with the 'proxy' option can't be contacted.<BR>\n"
"Please wait until the proxy is up again or start " PRG_NAME " in offline mode.",
NULL);
return;
}
}
else
{
if ( (Req->ConnSocket = Net->open (Host, Port)) < 0)
{
Req->Flags |= REQ_CONNERR;
ErrToReq (Req, 404, errno, "Host Unreachable / read() failed",
"The specified host can't be contacted.<BR>\n"
"Please wait until it is up again or try any other URLs.",
NULL);
return;
}
}
Req->Flags |= REQ_CONNSOCKET;
debug (D_REQUEST, ("ServCon -> Conn %d\n", (int) Req->ConnSocket));
}
/*)) */
/*(( "ServServer ()" */
/* Serv the server port. Accept new connections, set up request database */
void ServServer (int Peer, const char *PeerName)
{
request_t *Req = Requests;
assert (RequestsFree > 0);
DateStamp (&RequestStamp);
while (Req->Flags) /* any Flags set -> occupied */
Req++;
assert (Req - Requests < MaxRequests);
strncpy (Req->Address, PeerName, MAX_HOSTNAMELEN-1);
Req->Address [MAX_HOSTNAMELEN-1] = '\0';
debug (D_REQUEST, ("%02d: new request from '%s'\n", Req-Requests, PeerName));
Req->ReqSocket = Peer;
Req->Flags |= REQ_REQSOCKET;
Req->ReqBuffer[0] = Req->UrlBuffer [0] = Req->DataBuffer [0] = Req->Url [0] = '\0';
Req->ReqSent = Req->ReqRecv = Req->UrlSent = Req->UrlRecv =
Req->DataSent = Req->DataRecv = 0;
Req->LastStamp = RequestStamp;
RequestsFree--;
}
/*)) */
/*(( "ServRead ()" */
/* Server for all reads */
void ServRead (void)
{
int i, Bytes;
register request_t *Req;
for (i=0, Req=Requests; i < MaxRequests; i++, Req++)
{
if ((Req->Flags & REQ_REQSOCKET) && Net->checkread (Req->ReqSocket))
{
assert (Req->ReqRecv < MAX_REQBUFFER-1);
if ( (Bytes = Net->read (Req->ReqSocket, & Req->ReqBuffer [Req->ReqRecv],
MAX_REQBUFFER-1 - Req->ReqRecv)) < 0)
{
LogErr (Req, L_ERROR, NULL, errno, "on receiving request");
DeleteRequest (Req, FALSE);
}
else
{
Req->LastStamp = RequestStamp;
Req->ReqRecv += Bytes;
Req->ReqBuffer [Req->ReqRecv] = '\0'; /* needed for strchr() (and debug) */
debug (D_IO, ("%02d: Read %d bytes to 0x%x containing:\n'%s'\n", Req-Requests, Bytes,
Req->ReqRecv-Bytes, & Req->ReqBuffer [Req->ReqRecv - Bytes]));
#if defined (DEBUG) && ! defined (_M68020)
if ((Req->ReqRecv - Bytes) & 0x0001)
debug (D_IO, ("(odd address...\n"));
else
#endif
debug (D_IO, ("= 0x %08lx %08lx %08lx %08lx\n", * (long *) (Req->ReqBuffer + Req->ReqRecv - Bytes),
* (long *) (Req->ReqBuffer + Req->ReqRecv - Bytes + 4), * (long *) (Req->ReqBuffer + Req->ReqRecv - Bytes + 8),
* (long *) (Req->ReqBuffer + Req->ReqRecv - Bytes + 12)));
if (Bytes == 0) /* request socket gone away */
{
DeleteRequest (Req, TRUE);
continue;
}
ServConnect (Req); /* Check if anything can be done already */
if ((Req->Flags & REQ_DONE) && (Req->Flags & REQ_REQDONE) && (Req->DataRecv == 0))
{
DeleteRequest (Req, TRUE); /* all done, another time */
continue;
}
if ((Req->ReqRecv == MAX_REQBUFFER-1) && ! (Req->Flags & REQ_CONNSOCKET))
{ /* buffer full and no connection yet */
if (Req->Flags & REQ_URLDONE)
Req->ReqRecv = Req->ReqSent = 0; /* Skip remaining data (we won't send it...) */
else
{
LogErr (Req, L_ERROR, NULL, 0, "URL buffer (size %d) overflow", MAX_REQBUFFER-1);
DeleteRequest (Req, FALSE);
}
}
}
}
if ((Req->Flags & REQ_CONNSOCKET) && Net->checkread (Req->ConnSocket))
{
assert (Req->DataRecv < MAX_DATABUFFER - RESERVED_DATABUFFER);
if ( (Bytes = Net->read (Req->ConnSocket, & Req->DataBuffer [Req->DataRecv],
MAX_DATABUFFER - RESERVED_DATABUFFER - Req->DataRecv)) < 0)
{
ErrToReq (Req, 404, errno, "Host Unreachable / read() failed",
"The specified host can't be contacted.<BR>\n"
"Please wait until it is up again or try any other URLs.",
NULL);
// above only for 0 bytes read.
// ErrToData (Req, TReadErr, KeepUnfinished);
DeleteConnect (Req, KeepUnfinished);
}
else
{
Req->LastStamp = RequestStamp;
debug (D_IO, ("%02d: Read %d bytes from Con %d\n", Req-Requests, Bytes));
if (Bytes == 0) /* We're done */
{
DeleteConnect (Req, TRUE);
continue;
}
else if (Req->Flags & REQ_NEWCACHE)
CacheWrite (&Req->Cache, & Req->DataBuffer [Req->DataRecv], Bytes); /* no error checking... */
if (Req->Flags & REQ_REQSOCKET)
Req->DataRecv += Bytes; /* else: only save cache data */
}
}
}
}
/*)) */
/*(( "ServWrite ()" */
/* Server for all writes */
void ServWrite (void)
{
int i, Bytes;
register request_t *Req;
for (i=0, Req=Requests; i < MaxRequests; i++, Req++)
{
if ((Req->Flags & REQ_REQSOCKET) && Net->checkwrite (Req->ReqSocket))
{
assert (Req->DataRecv > Req->DataSent);
if ( (Bytes = Net->write (Req->ReqSocket, & Req->DataBuffer [Req->DataSent],
Req->DataRecv - Req->DataSent)) < 0)
{
LogErr (Req, L_ERROR, NULL, errno, "on sending data");
DeleteRequest (Req, FALSE);
continue;
}
else
{
Req->LastStamp = RequestStamp;
debug (D_IO, ("%02d: Wrote %d bytes to Req\n", Req-Requests, Bytes));
Req->DataSent += Bytes;
assert (Req->DataRecv >= Req->DataSent);
if (Bytes == 0) /* request socket is unable to receive data */
{
LogErr (Req, L_ERROR, NULL, 0, "request socket unable to receive data");
DeleteRequest (Req, FALSE);
continue;
}
if (Req->DataSent == Req->DataRecv) /* clear / shift data buffer */
Req->DataSent = Req->DataRecv = 0;
else if (Req->DataSent > SHIFT_DATABUFFER)
{
debug (D_IO, ("shifting databuffer size %d by %d bytes\n", Req->DataRecv - Req->DataSent, Req->DataSent));
memmove (Req->DataBuffer, & Req->DataBuffer [Req->DataSent],
Req->DataRecv - Req->DataSent);
Req->DataRecv -= Req->DataSent;
Req->DataSent = 0;
}
if (Req->DataRecv < MAX_DATABUFFER && (Req->Flags & REQ_DONE) && ! (Req->Flags & REQ_CONNSOCKET))
GetCacheData (Req);
if (Req->DataRecv == 0 && (Req->Flags & REQ_DONE) && (Req->Flags & REQ_REQDONE)) /* We're already done */
DeleteRequest (Req, TRUE);
}
}
if ((Req->Flags & REQ_CONNSOCKET) && Net->checkwrite (Req->ConnSocket))
{
if (Req->UrlRecv > Req->UrlSent)
Bytes = Net->write (Req->ConnSocket, &Req->UrlBuffer [Req->UrlSent],
Req->UrlRecv - Req->UrlSent);
else
{
assert (Req->ReqRecv > Req->ReqSent);
Bytes = Net->write (Req->ConnSocket, & Req->ReqBuffer [Req->ReqSent],
Req->ReqRecv - Req->ReqSent);
}
if (Bytes < 0)
{
LogErr (Req, L_ERROR, NULL, errno, "send error");
ErrToData (Req, TWriteErr, FALSE);
DeleteConnect (Req, FALSE);
}
else
{
Req->LastStamp = RequestStamp;
debug (D_IO, ("%02d: Wrote %d bytes to Conn from %s, 0x%x\n", Req-Requests, Bytes,
Req->UrlRecv > Req->UrlSent ? "UrlBuffer" : "ReqBuffer", Req->UrlRecv > Req->UrlSent ? Req->UrlSent : Req->ReqSent));
if (Req->UrlRecv > Req->UrlSent)
Req->UrlSent += Bytes;
else
Req->ReqSent += Bytes;
if (Bytes == 0) /* connection socket is unable to receive url */
{
LogErr (Req, L_ERROR, NULL, errno, "connection socket unable to receive request");
ErrToData (Req, TWriteErr, FALSE);
DeleteConnect (Req, FALSE);
continue;
}
if (Req->ReqSent > SHIFT_REQBUFFER)
{
debug (D_IO, ("shifting reqbuffer size %d+4 by %d bytes\n", Req->ReqRecv - Req->ReqSent, Req->ReqSent-4));
memmove (Req->ReqBuffer, & Req->ReqBuffer [Req->ReqSent - 4],
Req->ReqRecv - Req->ReqSent + 4);
Req->ReqRecv -= Req->ReqSent - 4;
Req->ReqSent = 4;
}
}
}
}
}
/*)) */
/*(( "RequestQueued()" */
/* Initiate contact to remote host for a queued request */
void RequestQueued (const char *Url)
{
request_t *Req = Requests;
int Len;
assert (MAX_REQBUFFER >= 4 * MAX_URLSAVE); /* to be on the save side */
while (Req->Flags) /* any Flags set -> occupied */
Req++;
assert (Req - Requests < MaxRequests);
Req->Flags = REQ_REQDONE;
Req->ReqSent= Req->DataSent = Req->DataRecv = Req->UrlSent = Req->UrlRecv = 0;
Req->Address[0] = Req->Url[0] = '\0';
Req->LastStamp = RequestStamp;
RequestsFree--;
Len = CreateUrlRequest ("GET", NULL, NULL, 0, Url, Req->ReqBuffer, TRUE, TRUE);
sprintf (& Req->ReqBuffer [Len], "User-Agent: %s\r\nAccept: */*\r\n\r\n", VERSHTTP);
Req->ReqRecv = strlen (Req->ReqBuffer);
debug (D_QUEUE, ("initiating request '%s'\n", Req->ReqBuffer));
ServConnect (Req);
}
/*)) */
/*(( "CheckTimeouts ()" */
/* Check whether any timeouts have occured */
/* it is checked at least every MIN_TIMEOUT to 2*MIN_TIMEOUT seconds.
* Thus MIN_TIMEOUT is the minimal granularity for checks. */
void CheckTimeouts (void)
{
int i;
long CalcTime;
request_t *Req;
struct DateStamp TimeoutStamp; /* Timeout check Stamp */
DateStamp (&RequestStamp);
/* Calculate NextCheck stamp */
NextCheckStamp.ds_Minute = NextCheckStamp.ds_Days = 0;
if ( (NextCheckStamp.ds_Tick = RequestStamp.ds_Tick + (MIN_TIMEOUT % 60) * TICKS_PER_SECOND) >= 60 * TICKS_PER_SECOND)
{
NextCheckStamp.ds_Tick -= 60 * TICKS_PER_SECOND;
NextCheckStamp.ds_Minute = 1;
}
if ( (NextCheckStamp.ds_Minute += RequestStamp.ds_Minute + (MIN_TIMEOUT / 60) % (24 * 60)) >= 24 * 60)
{
NextCheckStamp.ds_Minute -= 24 * 60;
NextCheckStamp.ds_Days = 1;
}
NextCheckStamp.ds_Days += RequestStamp.ds_Days + MIN_TIMEOUT / (24 * 60 * 60);
/* Calculate Timeout check stamp */
/* Could use the DateStamp entries, but I don't wanna rely on the entries being singed */
CalcTime = RequestStamp.ds_Tick - (TimeoutTime % 60) * TICKS_PER_SECOND;
if (CalcTime < 0)
{
TimeoutStamp.ds_Tick = CalcTime + 60 * TICKS_PER_SECOND;
CalcTime = -1;
}
else
{
TimeoutStamp.ds_Tick = CalcTime;
CalcTime = 0;
}
CalcTime += RequestStamp.ds_Minute - (TimeoutTime / 60) % (24 * 60);
if (CalcTime < 0)
{
TimeoutStamp.ds_Minute = CalcTime + 24 * 60;
CalcTime = -1;
}
else
{
TimeoutStamp.ds_Minute = CalcTime;
CalcTime = 0;
}
CalcTime += RequestStamp.ds_Days - TimeoutTime / (24 * 60 * 60);
assert (CalcTime >= 0); /* kidding?!? */
TimeoutStamp.ds_Days = CalcTime;
/* Check requests */
for (i=0, Req=Requests; i < MaxRequests; i++, Req++)
if (Req->Flags)
{
if (CompareDates (&Req->LastStamp, &TimeoutStamp) > 0)
{
if (Req->Flags & REQ_CONNSOCKET)
{
ErrToReq (Req, 404, 0, "Timeout", "The specified host did not send any more data.<BR>\n"
"The connection was cancled.",
NULL);
Req->LastStamp = RequestStamp;
ErrToData (Req, TTimeout, KeepUnfinished);
DeleteConnect (Req, KeepUnfinished);
}
else
{
LogErr (Req, L_ERROR, NULL, 0, "Client timeout");
DeleteRequest (Req, FALSE);
}
}
}
}
/*)) */
/*(( "CheckGetQueued ()" */
/* Check number of pending requests and initiate getting of queued URLs */
void CheckGetQueued (int Force) /* Force == 1 resets getqueueud state */
{
static queue_t *q = NULL;
cachefile_t Cache = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
static int State = 1;
if (Force)
State = 1;
if (State == 3 || ! GetQueued || OffLine)
return;
debug (D_QUEUE, ("Queue state %d\n", State));
while (RequestsFree > MinRequests)
{
switch (State) {
case 1: /* get all not yet existing Urls first */
if ( (q = QueueNext (q)) )
{
if (CacheGet (&Cache, q->Url, NULL) != 1)
{
RequestQueued (q->Url);
QueueUnqueue (q);
}
break;
}
State = 2;
/* No break! */
case 2:
if ( (q = QueueNext (q)) )
{
RequestQueued (q->Url);
QueueUnqueue (q);
break;
}
State = 3;
return;
default:
assert (0);
}
}
}
/*)) */
/*(( "ShutdownConnects ()" */
/* Shutdown: Close and delete all invalid cache entries. */
void ShutdownConnects (void)
{
request_t *Req;
int i;
/* queue nonready cache entries */
for (Req = Requests; Req < & Requests [MaxRequests]; Req++)
if (Req->Flags & REQ_NEWCACHE)
/* Don't queue inaktive requests (the ones sent from the proxy) when CacheUnreadRequests=0.
* Aktive requests are always requeued. */
if (Req->Url [0] && ((Req->Flags & REQ_REQSOCKET) || CacheUnreadRequests))
{
LogErr (Req, L_INFO, NULL, 0, "queueing URL on shutdown");
QueueQueue (Req->Url);
}
/* now remove all connections */
for (i=0, Req=Requests; i < MaxRequests; i++, Req++)
if ((Req->Flags & REQ_CONNSOCKET))
{
LogErr (Req, L_INFO, NULL, 0, "removing URL on shutdown");
ErrToData (Req, TShutdown, KeepUnfinished);
DeleteConnect (Req, KeepUnfinished);
}
}
/*)) */
/*(( "main ()" */
/* The main routine */
void main (int argc, char **argv)
{
netmethods_t *Netlist[] = { &NetAmiTCP, &NetAS225, NULL }; /* all available net protocolls */
netmethods_t **TestNet;
char *ProxyHost = NULL;
char *LogFile = "nil:";
int ProxyPort, NoHttpProxyProxy = FALSE;
char **FullArgv = argv;
for (TestNet = Netlist; (Net = *TestNet); TestNet++)
if (Net->init (FALSE)) /* Check for open in nonblocking mode */
break;
if (! Net)
{
fprintf (stderr, "Couldn't open network protocol handler - perhaps the network stack is not running.\n"
"Available network protocol handlers:\n");
for (TestNet = Netlist; (Net = *TestNet); TestNet++)
fprintf (stderr, "%s\n", Net->Descr);
fprintf (stdout, TEST_NOTCPIP "\n");
ExitAll (20);
}
PrgName = *argv++;
if (--argc == 1)
if (**argv == '?')
{
fprintf (stderr, "Usage: %s [proxy PROXYHOST PROXYPORT] [port PORT] [cache DIR] [del SECONDS]\n"
"[expire SECONDS] [reload SECONDS] [timeout SECONDS] [log FILE]\n"
"[numreq NUMBER] [minnumreq NUMBER] [unread] [offline] [get] [keepbad]\n"
"[proxylocal] [noqueue] [nohttpproxyproxy] [debug LEVEL]\n\n"
"Debug levels (may be or'ed):\n" DEBUG_LEVELS "\n"
"The cache keyword will change the local directory.\n", PrgName);
ExitAll (0);
}
while (argc)
{
if (strcasecmp (*argv, "proxy") == 0)
{
if (argc < 3)
{
fprintf (stderr, "%s: need two arguments for 'proxy'\n", PrgName);
ExitAll (1);
}
argv++;
argc -= 3;
ProxyHost = *argv++;
if (ProxyHost)
ProxyProxy = HttpProxyProxy = TRUE;
if ( (ProxyPort = atoi (*argv++)) <= 0)
{
fprintf (stderr, "%s: Wrong second argument for 'proxy' (need the port number).\n", PrgName);
ExitAll (1);
}
}
else if (strcasecmp (*argv, "port") == 0)
{
if (argc < 2)
{
fprintf (stderr, "%s: need a argument for 'port'\n", PrgName);
ExitAll (1);
}
argv++;
argc -= 2;
ServerPort = atoi (*argv++);
}
else if (strcasecmp (*argv, "cache") == 0)
{
if (argc < 2)
{
fprintf (stderr, "%s: need a argument for 'cache'\n", PrgName);
ExitAll (1);
}
argv++;
argc -= 2;
if (chdir (*argv++))
{
fprintf (stderr, "%s: no directory '%s': %s\n", PrgName, argv[-1], strerror (errno));
ExitAll (1);
}
}
else if (strcasecmp (*argv, "del") == 0)
{
if (argc < 2)
{
fprintf (stderr, "%s: need a argument for 'del'\n", PrgName);
ExitAll (1);
}
argv++;
argc -= 2;
DelCacheTime = atoi (*argv++);
}
else if (strcasecmp (*argv, "expire") == 0)
{
if (argc < 2)
{
fprintf (stderr, "%s: need a argument for 'expire'\n", PrgName);
ExitAll (1);
}
argv++;
argc -= 2;
ExpireCacheTime = atoi (*argv++);
}
else if (strcasecmp (*argv, "reload") == 0)
{
if (argc < 2)
{
fprintf (stderr, "%s: need a argument for 'reload'\n", PrgName);
ExitAll (1);
}
argv++;
argc -= 2;
ReloadCacheTime = atoi (*argv++);
}
else if (strcasecmp (*argv, "timeout") == 0)
{
if (argc < 2)
{
fprintf (stderr, "%s: need a argument for 'timeout'\n", PrgName);
ExitAll (1);
}
argv++;
argc -= 2;
TimeoutTime = atoi (*argv++);
}
else if (strcasecmp (*argv, "log") == 0)
{
if (argc < 2)
{
fprintf (stderr, "%s: need a argument for 'log'\n", PrgName);
ExitAll (1);
}
argv++;
argc -= 2;
LogFile = *argv++;
}
else if (strcasecmp (*argv, "numreq") == 0)
{
if (argc < 2)
{
fprintf (stderr, "%s: need a argument for 'numreq'\n", PrgName);
ExitAll (1);
}
argv++;
argc -= 2;
MaxRequests = atoi (*argv++);
}
else if (strcasecmp (*argv, "minnumreq") == 0)
{
if (argc < 2)
{
fprintf (stderr, "%s: need a argument for 'minnumreq'\n", PrgName);
ExitAll (1);
}
argv++;
argc -= 2;
MinRequests = atoi (*argv++);
}
else if (strcasecmp (*argv, "unread") == 0)
{
argv++;
argc--;
CacheUnreadRequests = TRUE;
}
else if (strcasecmp (*argv, "keepbad") == 0)
{
argv++;
argc--;
KeepUnfinished = TRUE;
}
else if (strcasecmp (*argv, "offline") == 0)
{
argv++;
argc--;
OffLine = 1;
}
else if (strcasecmp (*argv, "get") == 0)
{
argv++;
argc--;
GetQueued = 1;
}
else if (strcasecmp (*argv, "proxylocal") == 0)
{
argv++;
argc--;
ProxyLocal = TRUE;
}
else if (strcasecmp (*argv, "noqueue") == 0)
{
argv++;
argc--;
QueueMode = FALSE;
}
else if (strcasecmp (*argv, "nohttpproxyproxy") == 0)
{
argv++;
argc--;
NoHttpProxyProxy = TRUE;
}
else if (strcasecmp (*argv, "debug") == 0)
{
if (argc < 2)
{
fprintf (stderr, "%s: need a argument for 'debug'\n", PrgName);
ExitAll (1);
}
argv++;
argc -= 2;
#ifdef DEBUG
DebugLevel = atoi (*argv) | (1<<D_ALWAYS); /* always output error debugs. */
#endif
argv++;
}
else
{
fprintf (stderr, "%s: unknown option '%s'\n", PrgName, *argv);
ExitAll (1);
}
}
if (NoHttpProxyProxy)
HttpProxyProxy = FALSE;
if (MaxRequests <= MinRequests)
{
fprintf (stderr, "%s: argument 'numreq' %d needs nummeric value > 'minnumreq' %d\n", PrgName, MaxRequests, MinRequests);
ExitAll (1);
}
if (MaxRequests * 2 >= Net->FDSize - 4)
{
fprintf (stderr, "%s: argument 'numreq' maximum value of %d exceeded\n", PrgName, (Net->FDSize - 5) / 2);
ExitAll (1);
}
if (MinRequests < 0)
{
fprintf (stderr, "%s: argument 'minnumreq' must be positive or zero.\n", PrgName);
ExitAll (1);
}
Init (ProxyHost, ProxyPort, LogFile, FullArgv);
/* Never return */
for (;;)
{
CheckGetQueued (FALSE);
BuildFdSets ();
debug (D_IO, ("Select... RequestsFree %d\n", RequestsFree));
if (Net->select (ServServer, CheckTimeouts))
{
ShutdownConnects ();
if (Net) /* Cleaning up may take some time... */
Net->exit ();
Net = NULL;
fprintf (stderr, "%s: terminating due to signal\n", PrgName);
LogSpecial ("terminating due to signal\n");
ExitAll (0);
}
DateStamp (&RequestStamp);
if (CompareDates (&NextCheckStamp, &RequestStamp) > 0)
CheckTimeouts ();
ServRead ();
ServWrite ();
}
}
/*)) */